int enable_intremap(struct iommu *iommu)
{
struct ir_ctrl *ir_ctrl;
- u32 sts;
+ u32 sts, gcmd;
ASSERT(ecap_intr_remap(iommu->ecap) && iommu_intremap);
dmar_writeq(iommu->reg, DMAR_IRTA_REG, ir_ctrl->iremap_maddr);
/* set SIRTP */
- iommu->gcmd |= DMA_GCMD_SIRTP;
- dmar_writel(iommu->reg, DMAR_GCMD_REG, iommu->gcmd);
+ gcmd = dmar_readl(iommu->reg, DMAR_GSTS_REG);
+ gcmd |= DMA_GCMD_SIRTP;
+ dmar_writel(iommu->reg, DMAR_GCMD_REG, gcmd);
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl,
(sts & DMA_GSTS_SIRTPS), sts);
/* enable comaptiblity format interrupt pass through */
- iommu->gcmd |= DMA_GCMD_CFI;
- dmar_writel(iommu->reg, DMAR_GCMD_REG, iommu->gcmd);
+ gcmd |= DMA_GCMD_CFI;
+ dmar_writel(iommu->reg, DMAR_GCMD_REG, gcmd);
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl,
(sts & DMA_GSTS_CFIS), sts);
/* enable interrupt remapping hardware */
- iommu->gcmd |= DMA_GCMD_IRE;
- dmar_writel(iommu->reg, DMAR_GCMD_REG, iommu->gcmd);
+ gcmd |= DMA_GCMD_IRE;
+ dmar_writel(iommu->reg, DMAR_GCMD_REG, gcmd);
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl,
(sts & DMA_GSTS_IRES), sts);
ASSERT(ecap_intr_remap(iommu->ecap) && iommu_intremap);
- iommu->gcmd &= ~(DMA_GCMD_SIRTP | DMA_GCMD_CFI | DMA_GCMD_IRE);
- dmar_writel(iommu->reg, DMAR_GCMD_REG, iommu->gcmd);
+ sts = dmar_readl(iommu->reg, DMAR_GSTS_REG);
+ dmar_writel(iommu->reg, DMAR_GCMD_REG, sts & (~DMA_GCMD_IRE));
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl,
!(sts & DMA_GSTS_IRES), sts);
if ( !rwbf_quirk && !cap_rwbf(iommu->cap) )
return;
- val = iommu->gcmd | DMA_GCMD_WBF;
spin_lock_irqsave(&iommu->register_lock, flag);
- dmar_writel(iommu->reg, DMAR_GCMD_REG, val);
+ val = dmar_readl(iommu->reg, DMAR_GSTS_REG);
+ dmar_writel(iommu->reg, DMAR_GCMD_REG, val | DMA_GCMD_WBF);
/* Make sure hardware complete it */
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl,
static int iommu_set_root_entry(struct iommu *iommu)
{
- u32 cmd, sts;
+ u32 sts;
unsigned long flags;
spin_lock(&iommu->lock);
spin_unlock(&iommu->lock);
spin_lock_irqsave(&iommu->register_lock, flags);
dmar_writeq(iommu->reg, DMAR_RTADDR_REG, iommu->root_maddr);
- cmd = iommu->gcmd | DMA_GCMD_SRTP;
- dmar_writel(iommu->reg, DMAR_GCMD_REG, cmd);
+
+ sts = dmar_readl(iommu->reg, DMAR_GSTS_REG);
+ dmar_writel(iommu->reg, DMAR_GCMD_REG, sts | DMA_GCMD_SRTP);
/* Make sure hardware complete it */
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl,
dprintk(XENLOG_INFO VTDPREFIX,
"iommu_enable_translation: iommu->reg = %p\n", iommu->reg);
spin_lock_irqsave(&iommu->register_lock, flags);
- iommu->gcmd |= DMA_GCMD_TE;
- dmar_writel(iommu->reg, DMAR_GCMD_REG, iommu->gcmd);
+ sts = dmar_readl(iommu->reg, DMAR_GSTS_REG);
+ dmar_writel(iommu->reg, DMAR_GCMD_REG, sts | DMA_GCMD_TE);
/* Make sure hardware complete it */
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl,
unsigned long flags;
spin_lock_irqsave(&iommu->register_lock, flags);
- iommu->gcmd &= ~ DMA_GCMD_TE;
- dmar_writel(iommu->reg, DMAR_GCMD_REG, iommu->gcmd);
+ sts = dmar_readl(iommu->reg, DMAR_GSTS_REG);
+ dmar_writel(iommu->reg, DMAR_GCMD_REG, sts & (~DMA_GCMD_TE));
/* Make sure hardware complete it */
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl,
dmar_writeq(iommu->reg, DMAR_IQT_REG, 0);
/* enable queued invalidation hardware */
- iommu->gcmd |= DMA_GCMD_QIE;
- dmar_writel(iommu->reg, DMAR_GCMD_REG, iommu->gcmd);
+ sts = dmar_readl(iommu->reg, DMAR_GSTS_REG);
+ dmar_writel(iommu->reg, DMAR_GCMD_REG, sts | DMA_GCMD_QIE);
/* Make sure hardware complete it */
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl,
ASSERT(ecap_queued_inval(iommu->ecap) && iommu_qinval);
- iommu->gcmd &= ~DMA_GCMD_QIE;
- dmar_writel(iommu->reg, DMAR_GCMD_REG, iommu->gcmd);
+ sts = dmar_readl(iommu->reg, DMAR_GSTS_REG);
+ dmar_writel(iommu->reg, DMAR_GCMD_REG, sts & (~DMA_GCMD_QIE));
/* Make sure hardware complete it */
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl,
struct list_head list;
void __iomem *reg; /* Pointer to hardware regs, virtual addr */
u32 index; /* Sequence number of iommu */
- u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */
u32 nr_pt_levels;
u64 cap;
u64 ecap;